For accompanying slides (keynote and pdf), see: https://osf.io/p8amv/

Demonstrating Type M error

Low power situation

set.seed(987654321)
d<-20
sd<-150
lown<-power.t.test(d=d,sd=sd,power=.10,type="one.sample",alternative="two.sided",strict=TRUE)$n
highn<-power.t.test(d=d,sd=sd,power=.80,type="one.sample",alternative="two.sided",strict=TRUE)$n
nsim<-50
tlow<-thigh<-meanslow<-meanshigh<-CIuplow<-CIlwlow<-CIuphigh<-CIlwhigh<-NULL
critlow<-abs(qt(0.025,df=lown-1))
crithigh<-abs(qt(0.025,df=highn-1))

for(i in 1:nsim){
  x<-rnorm(lown,mean=d,sd=sd)
  meanslow[i]<-mean(x)
  tlow[i]<-t.test(x)$statistic
  CIuplow[i]<-mean(x)+critlow*sd(x)/sqrt(length(x))
  CIlwlow[i]<-mean(x)-critlow*sd(x)/sqrt(length(x))
  x<-rnorm(highn,mean=d,sd=sd)
  meanshigh[i]<-mean(x)
  thigh[i]<-t.test(x)$statistic
  CIuphigh[i]<-mean(x)+crithigh*sd(x)/sqrt(length(x))
  CIlwhigh[i]<-mean(x)-crithigh*sd(x)/sqrt(length(x))
}

 
siglow<-ifelse(abs(tlow)>abs(critlow),"p<0.05","p>0.05")
sighigh<-ifelse(abs(thigh)>abs(crithigh),"p<0.05","p>0.05")

summarylow<-data.frame(means=meanslow,significance=siglow, CIupper=CIuplow, CIlower=CIlwlow)
summaryhigh<-data.frame(index=1:nsim,means=meanshigh,significance=sighigh, CIupper=CIuphigh, CIlower=CIlwhigh)


# re-order data by mean effect size
summarylow<-summarylow[order(summarylow$means), ]
summarylow$index<-1:nrow(summarylow)
summaryhigh<-summaryhigh[order(summaryhigh$means), ]
summaryhigh$index<-1:nrow(summaryhigh)

p_low<-ggplot(summarylow, aes(y=means, x=index,
                              shape=significance,  
                              ymax=CIupper, ymin=CIlower)) + 
  geom_pointrange()+
  #coord_flip()+
  geom_point(size=2.5)+
  scale_shape_manual(values=c(2, 19))+
  geom_hline(yintercept=20) +
  theme_bw() + 
  scale_x_continuous(name = "Sample id")+
  scale_y_continuous(name = "means",limits=c(-200,200))+
  labs(title="Effect 20 ms, SD 150, \n n=25, power=0.10")+
  #theme(legend.position="none")+
  theme(legend.position=c(0.8,0.3))+geom_hline(yintercept=0, linetype="dotted")+magnifytext(sze=16)



p_low_anim<-p_low+transition_time(index)+enter_fade()+exit_fade()+shadow_trail()

p_low_anim

altrenderer <- gifski_renderer(loop=FALSE)

#save:
anim_save(p_low_anim,file="plownanim.gif", renderer=altrenderer)

High power situation

p_hi<-ggplot(summaryhigh, aes(y=means, x=index,
                              shape=significance, ymax=CIupper, ymin=CIlower)) + 
  geom_pointrange()+
  #coord_flip()+
  geom_point(size=2.5)+
  scale_shape_manual(values=c(2, 19))+
    scale_x_continuous(name = "Sample id")+ 
  geom_hline(yintercept=d) +
  theme_bw() + 
  scale_y_continuous(name = "means",limits=c(-200,200))+
  labs(title="Effect 20 ms, SD 150, \n n=350, power=0.80")+
  theme(legend.position=c(0.8,0.3))+geom_hline(yintercept=0, linetype="dotted")+
  magnifytext(sze=16)

p_hi_anim<-p_hi+transition_time(index)+enter_fade()+exit_fade()+shadow_trail()

anim_save(p_hi_anim,
        file="phianim.gif", renderer=altrenderer)

Confidence intervals plot

library(rstan)
## Loading required package: StanHeaders
## rstan (Version 2.19.2, GitRev: 2e1f913d3ca3)
## For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores()).
## To avoid recompilation of unchanged Stan programs, we recommend calling
## rstan_options(auto_write = TRUE)
load("data/lmer_estimates2.Rda")
dat<-list(N=dim(lmer_estimates2)[1],
          y=lmer_estimates2$Estimate,
          sigma=lmer_estimates2$Std..Error)

fit <- stan(file='StanModels/rema.stan', data=dat,
            iter=2000, chains=4, seed=987654321,
            control = list(adapt_delta = 0.8))
## 
## SAMPLING FOR MODEL 'rema' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.3e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.067248 seconds (Warm-up)
## Chain 1:                0.063114 seconds (Sampling)
## Chain 1:                0.130362 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'rema' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 3e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.061325 seconds (Warm-up)
## Chain 2:                0.051304 seconds (Sampling)
## Chain 2:                0.112629 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'rema' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 6e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.06 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.072621 seconds (Warm-up)
## Chain 3:                0.053468 seconds (Sampling)
## Chain 3:                0.126089 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'rema' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 5e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.05 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.055006 seconds (Warm-up)
## Chain 4:                0.067974 seconds (Sampling)
## Chain 4:                0.12298 seconds (Total)
## Chain 4:
## Warning: There were 140 divergent transitions after warmup. Increasing adapt_delta above 0.8 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## Warning: Examine the pairs() plot to diagnose sampling problems
## Warning: The largest R-hat is 1.05, indicating chains have not mixed.
## Running the chains for more iterations may help. See
## http://mc-stan.org/misc/warnings.html#r-hat
## Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
## Running the chains for more iterations may help. See
## http://mc-stan.org/misc/warnings.html#bulk-ess
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## http://mc-stan.org/misc/warnings.html#tail-ess
paramnames<-c("theta","tau")
#print(fit,pars=paramnames)

params<-extract(fit,pars=paramnames)
#str(params)

mean_theta<-mean(params$theta)
lower_theta<-quantile(params$theta,0.025)
upper_theta<-quantile(params$theta,0.975)

load("data/lmer_estimates3.Rda")
lmer_estimates3$id<-factor(lmer_estimates3$id,levels=lmer_estimates3$id)
pd<-position_dodge(0.6)
p_lmer<-ggplot(lmer_estimates3, aes(x=id, 
                               y=Estimate,group=id)) +
    geom_errorbar(aes(ymin=lower, ymax=upper),
                  width=.25, size=.5, position=pd) +
      annotate("rect", 
             xmin = 0, 
             xmax = 11, 
             ymin = upper_theta, 
             ymax = lower_theta,
             color = "black",alpha=0.2)+
#    geom_hline(yintercept=mean_theta,
#               color="black",)+
    labs(title="Agreement attraction across 10 studies") +
    xlab("Study id")+
    ylab("Estimate (log ms)")+
    geom_hline(yintercept=0,col="gray")+
    geom_point(position=pd, size=2)+
    theme_bw()+
    magnifytext()

p_lmer_anim<-p_lmer+transition_time(as.numeric(id))+enter_fade()+exit_fade()+shadow_trail()

p_lmer_anim

anim_save(p_lmer_anim,
          file="plmeranim.gif",
          renderer=altrenderer)

Meta analysis plot

load("data/data_model.Rda")
load("data/data_model_dillonrep.Rda")
modelquantiles<-quantile(subset(data_model,expt=="model")$posterior,prob=c(0.025,0.975))

expt_dillonrep<-subset(data_model_dillonrep,expt==11)
#head(expt_dillonrep)
expt_dillonrep$expt<-factor("repl")
data_model11studies<-rbind(data_model,expt_dillonrep)

scl<-1
p_stan<-ggplot(data_model11studies, 
       aes(x = posterior, y = factor(expt),height = ..density..
           )) + coord_flip()+
  geom_density_ridges(scale = scl
                      ,stat = "density",
                      rel_min_height = 0.01) +
  scale_y_discrete(expand = c(0.01, 0)) +
  scale_x_continuous(expand = c(0.01, 0)) +
  #scale_fill_brewer(palette = "PuBuGn") +
  theme_ridges() + theme(legend.position = "none")+
  xlab("agreement attraction effect")+
  ylab("expt")+
  geom_vline(xintercept=0,col="gray")+
  ## meta-analysis based on frequentist estimates
  geom_vline(xintercept=-9)+
  geom_vline(xintercept=-36)+
    magnifytext(sze=14)

p_stan_anim<-p_stan + transition_states(as.numeric(expt)) +
  enter_fade() +
  exit_fade()+
  shadow_trail()

p_stan_anim

#animate(p_stan_anim, nframes = 100,
#        fps=5,
#        rewind = FALSE,
#        start_pause = 1)


anim_save(p_stan_anim,file="pstananim.gif",renderer=altrenderer)

Further readings